tp->dev->hard_start_xmit = tg3_start_xmit;
tp->rx_offset = 2;
-/* XXX Xen: we trust our ASICs, for better or worse ;-) */
-#if 0
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
(tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
tp->rx_offset = 0;
-#endif
/* By default, disable wake-on-lan. User can change this
* using ETHTOOL_SWOL.
#include <linux/types.h>
typedef struct tx_entry_st {
- unsigned long addr; /* machine address of packet */
- unsigned short size; /* in bytes */
- unsigned short status; /* per descriptor status. */
+ unsigned long addr; /* machine address of packet (IN VAR) */
+ unsigned short size; /* in bytes (IN VAR) */
+ unsigned char status; /* per descriptor status (OUT VAR) */
+ unsigned char _unused;
} tx_entry_t;
typedef struct rx_entry_st {
- unsigned long addr; /* machine address of PTE to swizzle */
- unsigned short size; /* in bytes */
- unsigned short status; /* per descriptor status. */
+ unsigned long addr; /* machine address of PTE to swizzle (IN VAR) */
+ unsigned short size; /* in bytes (OUT VAR) */
+ unsigned char status; /* per descriptor status (OUT VAR) */
+ unsigned char offset; /* offset in page of received pkt (OUT VAR) */
} rx_entry_t;
#define TX_RING_SIZE 256
/* Drop a new rule down to the network tables. */
int add_net_rule(net_rule_t *rule);
-
-/* Descriptor status values:
- */
-
-#define RING_STATUS_OK 0 // Everything is gravy.
-#define RING_STATUS_ERR_CFU -1 // Copy from user problems.
-#define RING_STATUS_BAD_PAGE -2 // What they gave us was pure evil.
+/* Descriptor status values */
+#define RING_STATUS_OK 0 /* Everything is gravy. */
+#define RING_STATUS_ERR_CFU 1 /* Copy from user problems. */
+#define RING_STATUS_BAD_PAGE 2 /* What they gave us was pure evil */
#endif
typedef struct rx_shadow_entry_st {
unsigned long addr;
unsigned short size;
- unsigned short status;
+ unsigned char status;
+ unsigned char offset;
unsigned long flush_count;
} rx_shadow_entry_t;
void *header;
unsigned long payload;
unsigned short size;
- unsigned short status;
+ unsigned char status;
+ unsigned char _unused;
} tx_shadow_entry_t;
typedef struct net_shadow_ring_st {
}
rx = shadow_ring->rx_ring + i;
- if ( (skb->len + ETH_HLEN) < rx->size )
- rx->size = skb->len + ETH_HLEN;
-
+ ASSERT(skb->len <= PAGE_SIZE);
+ rx->size = skb->len;
+ rx->offset = (unsigned char)((unsigned long)skb->data & ~PAGE_MASK);
+
spin_lock_irqsave(&vif->domain->page_lock, flags);
g_pte = map_domain_mem(rx->addr);
int netif_rx(struct sk_buff *skb)
{
unsigned long cpu_mask;
- int this_cpu = smp_processor_id();
+ int offset, this_cpu = smp_processor_id();
unsigned long flags;
net_vif_t *vif;
ASSERT(skb->skb_type == SKB_ZERO_COPY);
ASSERT((skb->data - skb->head) == (18 + ETH_HLEN));
-
+
+ /*
+ * Offset will include 16 bytes padding from dev_alloc_skb, 14 bytes for
+ * ethernet header, plus any other alignment padding added by the driver.
+ */
+ offset = (int)skb->data & ~PAGE_MASK;
skb->head = (u8 *)map_domain_mem(((skb->pf - frame_table) << PAGE_SHIFT));
- skb->data = skb->head;
- skb_reserve(skb,18); /* 18 is the 16 from dev_alloc_skb plus 2 for
- IP header alignment. */
+ skb->data = skb->nh.raw = skb->head + offset;
+ skb->tail = skb->data + skb->len;
+ skb_push(skb, ETH_HLEN);
skb->mac.raw = skb->data;
- skb->data += ETH_HLEN;
- skb->nh.raw = skb->data;
-
+
netdev_rx_stat[this_cpu].total++;
if ( skb->src_vif == VIF_UNKNOWN_INTERFACE )
skb->src_vif = VIF_PHYSICAL_INTERFACE;
if ( skb->dst_vif == VIF_UNKNOWN_INTERFACE )
- skb->dst_vif = __net_get_target_vif(skb->mac.raw,
- skb->len, skb->src_vif);
+ skb->dst_vif = __net_get_target_vif(skb->data, skb->len, skb->src_vif);
if ( ((vif = sys_vif_list[skb->dst_vif]) == NULL) ||
(skb->dst_vif <= VIF_PHYSICAL_INTERFACE) )
* This copy assumes that rx_shadow_entry_t is an extension of
* rx_net_entry_t extra fields must be tacked on to the end.
*/
- if ( copy_from_user( shadow_ring->rx_ring+i, net_ring->rx_ring+i,
- sizeof (rx_entry_t) ) )
+ if ( copy_from_user(shadow_ring->rx_ring+i, net_ring->rx_ring+i,
+ sizeof (rx_entry_t) ) )
{
DPRINTK("Bad copy_from_user for rx ring\n");
shadow_ring->rx_ring[i].status = RING_STATUS_ERR_CFU;
skb = dev_alloc_skb(RX_BUF_SIZE);
if ( skb == NULL ) break;
skb->dev = dev;
- skb_reserve(skb, 2); /* word align the IP header */
np->rx_skb_ring[i] = skb;
np->net_ring->rx_ring[i].addr = get_ppte((unsigned long)skb->head);
np->net_ring->rx_ring[i].size = RX_BUF_SIZE - 16; /* arbitrary */
struct net_device *dev = (struct net_device *)dev_id;
struct net_private *np = dev->priv;
struct sk_buff *skb;
+ rx_entry_t *rx;
again:
for ( i = np->rx_idx; i != np->net_ring->rx_cons; i = RX_RING_INC(i) )
{
- if (np->net_ring->rx_ring[i].status != RING_STATUS_OK)
+ rx = &np->net_ring->rx_ring[i];
+ skb = np->rx_skb_ring[i];
+
+ if ( rx->status != RING_STATUS_OK )
{
- printk(KERN_ALERT "bad buffer on RX ring!(%d)\n",
- np->net_ring->rx_ring[i].status);
+ printk(KERN_ALERT "bad buffer on RX ring!(%d)\n", rx->status);
+ dev_kfree_skb_any(skb);
continue;
}
- skb = np->rx_skb_ring[i];
-
- phys_to_machine_mapping[virt_to_phys(skb->head) >> PAGE_SHIFT] =
- (*(unsigned long *)phys_to_virt(
- machine_to_phys(np->net_ring->rx_ring[i].addr))
- ) >> PAGE_SHIFT;
-
- skb_put(skb, np->net_ring->rx_ring[i].size);
- skb->protocol = eth_type_trans(skb, dev);
/*
* Set up shinfo -- from alloc_skb This was particularily nasty: the
skb_shinfo(skb)->nr_frags = 0;
skb_shinfo(skb)->frag_list = NULL;
+ phys_to_machine_mapping[virt_to_phys(skb->head) >> PAGE_SHIFT] =
+ (*(unsigned long *)phys_to_virt(machine_to_phys(rx->addr))
+ ) >> PAGE_SHIFT;
+
+ if ( rx->offset < 16 )
+ {
+ printk(KERN_ALERT "need pkt offset >= 16 (got %d)\n", rx->offset);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ skb_reserve(skb, rx->offset - 16);
+
+ skb_put(skb, np->net_ring->rx_ring[i].size);
+ skb->protocol = eth_type_trans(skb, dev);
+
np->stats.rx_packets++;
np->stats.rx_bytes += np->net_ring->rx_ring[i].size;